if ( svc->cur_budget <= 0 )
{
svc->cur_budget = 0;
- set_bit(__RTDS_depleted, &svc->flags);
+ __set_bit(__RTDS_depleted, &svc->flags);
}
/* TRACE */
if ( snext != scurr &&
!is_idle_vcpu(current) &&
vcpu_runnable(current) )
- set_bit(__RTDS_delayed_runq_add, &scurr->flags);
+ __set_bit(__RTDS_delayed_runq_add, &scurr->flags);
snext->last_start = now;
ret.time = -1; /* if an idle vcpu is picked */
if ( snext != scurr )
{
q_remove(snext);
- set_bit(__RTDS_scheduled, &snext->flags);
+ __set_bit(__RTDS_scheduled, &snext->flags);
}
if ( snext->vcpu->processor != cpu )
{
replq_remove(ops, svc);
}
else if ( svc->flags & RTDS_delayed_runq_add )
- clear_bit(__RTDS_delayed_runq_add, &svc->flags);
+ __clear_bit(__RTDS_delayed_runq_add, &svc->flags);
}
/*
*/
if ( unlikely(svc->flags & RTDS_scheduled) )
{
- set_bit(__RTDS_delayed_runq_add, &svc->flags);
+ __set_bit(__RTDS_delayed_runq_add, &svc->flags);
/*
* The vcpu is waking up already, and we didn't even had the time to
* remove its next replenishment event from the replenishment queue
struct rt_vcpu *svc = rt_vcpu(vc);
spinlock_t *lock = vcpu_schedule_lock_irq(vc);
- clear_bit(__RTDS_scheduled, &svc->flags);
+ __clear_bit(__RTDS_scheduled, &svc->flags);
/* not insert idle vcpu to runq */
if ( is_idle_vcpu(vc) )
goto out;
- if ( test_and_clear_bit(__RTDS_delayed_runq_add, &svc->flags) &&
+ if ( __test_and_clear_bit(__RTDS_delayed_runq_add, &svc->flags) &&
likely(vcpu_runnable(vc)) )
{
runq_insert(ops, svc);
runq_tickle(ops, next_on_runq);
}
else if ( vcpu_on_q(svc) &&
- test_and_clear_bit(__RTDS_depleted, &svc->flags) )
+ __test_and_clear_bit(__RTDS_depleted, &svc->flags) )
runq_tickle(ops, svc);
list_del(&svc->replq_elem);